* boundary. Take into account buffer start offset. All other calls are
* conservative and always search the dma_map list if it's non-empty.
*/
- if ((((unsigned int)ptr & ~PAGE_MASK) + size) <= PAGE_SIZE) {
+ if (((((unsigned long)ptr) & ~PAGE_MASK) + size) <= PAGE_SIZE) {
dma = virt_to_bus(ptr);
} else {
BUG_ON((bnc = dma_alloc_coherent(dev, size, &dma, 0)) == NULL);
spin_unlock_irqrestore(&dma_map_lock, flags);
}
- if ((dma+size) & ~*hwdev->dma_mask)
+ if ((dma+size) & ~*dev->dma_mask)
out_of_line_bug();
return dma;
}
dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
- int direction);
+ enum dma_data_direction direction);
extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
- int direction);
+ enum dma_data_direction direction);
#define dma_map_page(dev,page,offset,size,dir) \
dma_map_single((dev), page_address(page)+(offset), (size), (dir))
extern void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- int direction);
+ enum dma_data_direction direction);
extern void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- int direction);
+ enum dma_data_direction direction);
static inline void dma_sync_sg_for_cpu(struct device *hwdev,
struct scatterlist *sg,